Bastion Server(堡壘機)是一台位於 public subnet 中的 EC2 Instance,通常用於提供對 private subnet 內的資源提供安全遠端訪問權限。它擔任了進入 private 網路的入口,用於管理和維護內部資源,如資料庫服務器、應用服務器等。
NAT (Network Address Translation) Server 也是一台位於 public subnet 中的 EC2 Instance,用於允許 private subnet 中的資源提供訪問 Internet 的服務。它擔任了 outbound 流量的代理,將 private subnet 內資源的網路請求轉發到 Internet,並維護反應的網路連接狀態。
本篇是實作於 AWS 服務需要建立的這兩個 EC2 instances 之 Terraform 模組,完整的專案程式碼分享在我的 Github 上。
./modules/my_instances
目錄中:├── configs
│ ├── subnet
│ │ └── my-subnets.yaml
│ └── vpc
│ └── my-vpcs.yaml
├── example.tfvars
├── locals.tf
├── main.tf
├── modules
│ ├── my_igw
│ ├── my_instances
│ │ ├── configure_nat.sh
│ │ ├── data.aws_ami.ami.tf
│ │ ├── instance.tf
│ │ ├── outputs.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── my_nacls
│ ├── my_subnets
│ └── my_vpc
└── variables.tf
my_instances
模組./modules/my_instances/outputs.tf
:output "bastion_instance_id" {
value = aws_instance.bastion_instance.id
}
output "nat_server_instance_id" {
value = one(aws_instance.nat_server_instance[*].id)
}
./modules/my_instances/provider.tf
: 傳入變數的說明可以參考 description
說明。provider "aws" {
region = var.aws_region
profile = var.aws_profile
}
./modules/my_instances/variables.tf
:variable "aws_region" {
description = "AWS region"
default = "ap-northeast-1"
}
variable "aws_profile" {
description = "AWS profile"
default = ""
}
variable "project_name" {
type = string
description = "Project name"
default = ""
}
variable "department_name" {
type = string
description = "Department name"
default = "SRE"
}
variable "instance_type" {
type = string
description = "The instance type of Bastion Server and NAT Server"
default = "t3.small"
}
variable "ssh_key_name" {
type = string
description = "The SSH key of Bastion Server and NAT Server"
default = ""
}
variable "bastion_security_group_ids" {
type = list(string)
description = "The security groups of Bastion Server"
default = []
}
variable "nat_server_security_group_ids" {
type = list(string)
description = "The security groups of NAT Server"
default = []
}
variable "subnet_bastion_id" {
type = string
description = "The subnet id of Bastion Server"
default = ""
}
variable "subnet_nat_server_id" {
type = string
description = "The subnet id of NAT Server"
default = ""
}
variable "create_nat_server_instance" {
type = bool
description = "Condition for creating NAT Server or not"
default = true
}
variable "bastion_ami_id" {
type = string
description = "Specific AMI id for Bastion Server"
default = null
}
variable "nat_server_ami_id" {
type = string
description = "Specific AMI id for NAT Server"
default = null
}
variable "bastion_user_data" {
type = string
description = "The user data of Bastion Server"
default = null
}
variable "bastion_launch_template" {
type = map(any)
description = "The launch template of Bastion Server"
default = null
}
variable "bastion_ami" {
type = map(any)
description = "The AMI search criteria of Bastion Server"
default = null
}
variable "nat_server_launch_template" {
type = map(any)
description = "The launch template of NAT Server"
default = null
}
./modules/my_instances/configure_nat.sh
: 定義 NAT Server 啟動時的 userdata 目的為啟用網路封包轉發。#!/bin/bash
sysctl -w net.ipv4.ip_forward=1
/sbin/iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
yum install -y iptables-services
service iptables save
./modules/my_instances/data.aws_ami.ami.tf
: 定義預設搜尋 AMI 的參數。data "aws_ami" "ami" {
most_recent = true
filter {
name = "name"
values = ["amzn2-ami-hvm-*-x86_64-ebs"]
}
filter {
name = "root-device-type"
values = ["ebs"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
owners = ["amazon"]
}
data "aws_ami" "bastion_ami" {
most_recent = true
filter {
name = "name"
values = var.bastion_ami["filter"]
}
owners = var.bastion_ami["owners"]
}
./modules/my_instances/instance.tf
:data "local_file" "configure_nat" {
filename = "${path.module}/configure_nat.sh"
}
resource "aws_instance" "bastion_instance" {
ami = (var.bastion_ami_id != null) ? var.bastion_ami_id : data.aws_ami.bastion_ami.id
instance_type = var.instance_type
key_name = var.ssh_key_name
vpc_security_group_ids = var.bastion_security_group_ids
subnet_id = var.subnet_bastion_id
user_data = var.bastion_user_data
dynamic "launch_template" {
for_each = (var.bastion_launch_template != null) ? [var.bastion_launch_template] : []
content {
name = launch_template.value.name
version = launch_template.value.version
}
}
tags = {
Department = var.department_name
Project = var.project_name
Name = "Bastion"
"Prometheus-monitor" = "enabled"
}
}
resource "aws_instance" "nat_server_instance" {
count = var.create_nat_server_instance ? 1 : 0
ami = (var.nat_server_ami_id != null) ? var.nat_server_ami_id : data.aws_ami.ami.id
instance_type = var.instance_type
key_name = var.ssh_key_name
vpc_security_group_ids = var.nat_server_security_group_ids
source_dest_check = false
subnet_id = var.subnet_nat_server_id
user_data_base64 = data.local_file.configure_nat.content_base64
dynamic "launch_template" {
for_each = (var.nat_server_launch_template != null) ? [var.nat_server_launch_template] : []
content {
name = launch_template.value.name
version = launch_template.value.version
}
}
tags = {
Department = var.department_name
Project = var.project_name
Name = "NAT Server"
"Prometheus-monitor" = "enabled"
}
}
example.tfvars
: 需要指定你建立 Bastion 和 NAT Server 使用到的 SSH key 的名稱。aws_region="ap-northeast-1"
aws_profile="<YOUR_PROFILE>"
project_name="example"
department_name="SRE"
ssh_key_name="<YOUR_SSH_KEY>"
locals.tf
: 設定建立 Bastion Server 的本地變數
bastion_allowed_ips
: 基於安全性考量務必設定可以連入 Bastion Server SSH 的 IP 列表。bastion_ami
: 指定 Bastion Server 使用用來搜尋 AMI 的參數有 Name 和 Owner,如果要換成自己 build 的 AMI 修改這些參數即可。locals {
bastion_allowed_ips = [
"0.0.0.0/0"
]
bastion_ami = {
filter = ["amzn2-ami-hvm-*-x86_64-ebs"]
owners = ["amazon"]
}
}
main.tf
: 一併建立 my_bastion_sg
與 my_nat_server_sg
兩個資源。terraform {
required_providers {
aws = {
version = "5.15.0"
}
}
backend "s3" {
bucket = "<YOUR_S3_BUCKET_NAME>"
dynamodb_table = "<YOUR_DYNAMODB_TABLE_NAME>"
key = "terraform.tfstate"
region = "ap-northeast-1"
shared_credentials_file = "~/.aws/config"
profile = "<YOUR_PROFILE>"
}
}
# vpc
module "vpc" {
aws_profile = var.aws_profile
aws_region = var.aws_region
department_name = var.department_name
project_name = var.project_name
vpc_path = "./configs/vpc/my-vpcs.yaml"
source = "./modules/my_vpc"
}
# subnet
module "subnet" {
aws_profile = var.aws_profile
aws_region = var.aws_region
department_name = var.department_name
project_name = var.project_name
vpc_id = module.vpc.my_vpcs["my-vpc"].id
subnet_path = "./configs/subnet/my-subnets.yaml"
source = "./modules/my_subnets"
}
module "igw" {
aws_profile = var.aws_profile
aws_region = var.aws_region
department_name = var.department_name
project_name = var.project_name
vpc_id = module.vpc.my_vpcs["my-vpc"].id
source = "./modules/my_igw"
}
# nacl
module "nacl" {
# checkov:skip=CKV_AWS_230: check it later
# checkov:skip=CKV_AWS_229: check it later
# checkov:skip=CKV_AWS_232: check it later
# checkov:skip=CKV_AWS_231: check it later
aws_profile = var.aws_profile
aws_region = var.aws_region
department_name = var.department_name
project_name = var.project_name
vpc_cidr = module.vpc.my_vpcs["my-vpc"].cidr_block
vpc_id = module.vpc.my_vpcs["my-vpc"].id
subnet_public_a_id = module.subnet.subnets["my-public-ap-northeast-1a"].id
subnet_public_c_id = module.subnet.subnets["my-public-ap-northeast-1c"].id
subnet_public_d_id = module.subnet.subnets["my-public-ap-northeast-1d"].id
subnet_application_a_id = module.subnet.subnets["my-application-ap-northeast-1a"].id
subnet_application_c_id = module.subnet.subnets["my-application-ap-northeast-1c"].id
subnet_application_d_id = module.subnet.subnets["my-application-ap-northeast-1d"].id
subnet_intra_a_id = module.subnet.subnets["my-intra-ap-northeast-1a"].id
subnet_intra_c_id = module.subnet.subnets["my-intra-ap-northeast-1c"].id
subnet_intra_d_id = module.subnet.subnets["my-intra-ap-northeast-1d"].id
subnet_persistence_a_id = module.subnet.subnets["my-persistence-ap-northeast-1a"].id
subnet_persistence_c_id = module.subnet.subnets["my-persistence-ap-northeast-1c"].id
subnet_persistence_d_id = module.subnet.subnets["my-persistence-ap-northeast-1d"].id
subnet_nat_server_id = module.subnet.subnets["my-nat-server"].id
source = "./modules/my_nacls"
}
resource "aws_security_group" "nxd_bastion_sg" {
description = "Used for bastion instance public"
ingress {
cidr_blocks = local.bastion_allowed_ips
description = "ssh from allowed ips"
from_port = 22
to_port = 22
protocol = "tcp"
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
name = "bastion-sg"
tags = {
Department = var.department_name
Name = "Bastion-SG"
Project = var.project_name
}
tags_all = {
Department = var.department_name
Name = "Bastion-SG"
Project = var.project_name
}
vpc_id = module.vpc.my_vpcs["my-vpc"].id
}
resource "aws_security_group" "nxd_nat_server_sg" {
description = "Used for NAT instance public"
egress {
cidr_blocks = ["0.0.0.0/0"]
from_port = "0"
ipv6_cidr_blocks = ["::/0"]
protocol = "-1"
self = "false"
to_port = "0"
}
ingress {
cidr_blocks = [module.vpc.my_vpcs["my-vpc"].cidr_block]
from_port = "0"
protocol = "-1"
self = "false"
to_port = "0"
}
name = "nat-server-sg"
tags = {
Department = var.department_name
Name = "NAT-Server-SG"
Project = var.project_name
}
tags_all = {
Department = var.department_name
Name = "NAT-Server-SG"
Project = var.project_name
}
vpc_id = module.vpc.my_vpcs["my-vpc"].id
}
# instances
module "instances" {
aws_profile = var.aws_profile
aws_region = var.aws_region
department_name = var.department_name
project_name = var.project_name
instance_type = "t3a.small"
subnet_bastion_id = module.subnet.subnets["my-public-ap-northeast-1d"].id
subnet_nat_server_id = module.subnet.subnets["my-nat-server"].id
bastion_security_group_ids = [aws_security_group.nxd_bastion_sg.id]
nat_server_security_group_ids = [aws_security_group.nxd_nat_server_sg.id]
ssh_key_name = var.ssh_key_name
bastion_ami = local.bastion_ami
bastion_ami_id = null
nat_server_ami_id = null
create_nat_server_instance = true
bastion_launch_template = null
bastion_user_data = <<HERE
#!/bin/bash
echo "Do something you want here."
HERE
source = "./modules/my_instances"
}
於專案目錄下執行 terraform init && terraform plan --out .plan -var-file=example.tfvars
來確認一下結果:
Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
# aws_security_group.my_bastion_sg will be created
+ resource "aws_security_group" "my_bastion_sg" {
+ arn = (known after apply)
+ description = "Used for bastion instance public"
+ egress = [
+ {
+ cidr_blocks = [
+ "0.0.0.0/0",
]
+ description = ""
+ from_port = 0
+ ipv6_cidr_blocks = [
+ "::/0",
]
+ prefix_list_ids = []
+ protocol = "-1"
+ security_groups = []
+ self = false
+ to_port = 0
},
]
+ id = (known after apply)
+ ingress = [
+ {
+ cidr_blocks = [
+ "114.34.61.84/32",
]
+ description = "ssh from allowed ips"
+ from_port = 22
+ ipv6_cidr_blocks = []
+ prefix_list_ids = []
+ protocol = "tcp"
+ security_groups = []
+ self = false
+ to_port = 22
},
]
+ name = "bastion-sg"
+ name_prefix = (known after apply)
+ owner_id = (known after apply)
+ revoke_rules_on_delete = false
+ tags = {
+ "Department" = "SRE"
+ "Name" = "Bastion-SG"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "Bastion-SG"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# aws_security_group.my_nat_server_sg will be created
+ resource "aws_security_group" "my_nat_server_sg" {
+ arn = (known after apply)
+ description = "Used for NAT instance public"
+ egress = [
+ {
+ cidr_blocks = [
+ "0.0.0.0/0",
]
+ description = ""
+ from_port = 0
+ ipv6_cidr_blocks = [
+ "::/0",
]
+ prefix_list_ids = []
+ protocol = "-1"
+ security_groups = []
+ self = false
+ to_port = 0
},
]
+ id = (known after apply)
+ ingress = [
+ {
+ cidr_blocks = [
+ "10.2.0.0/16",
]
+ description = ""
+ from_port = 0
+ ipv6_cidr_blocks = []
+ prefix_list_ids = []
+ protocol = "-1"
+ security_groups = []
+ self = false
+ to_port = 0
},
]
+ name = "nat-server-sg"
+ name_prefix = (known after apply)
+ owner_id = (known after apply)
+ revoke_rules_on_delete = false
+ tags = {
+ "Department" = "SRE"
+ "Name" = "NAT-Server-SG"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "NAT-Server-SG"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.igw.aws_internet_gateway.my_igw will be created
+ resource "aws_internet_gateway" "my_igw" {
+ arn = (known after apply)
+ id = (known after apply)
+ owner_id = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "example-igw"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "example-igw"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.instances.aws_instance.bastion_instance will be created
+ resource "aws_instance" "bastion_instance" {
+ ami = "ami-0f419d2f905bb344e"
+ arn = (known after apply)
+ associate_public_ip_address = (known after apply)
+ availability_zone = (known after apply)
+ cpu_core_count = (known after apply)
+ cpu_threads_per_core = (known after apply)
+ disable_api_stop = (known after apply)
+ disable_api_termination = (known after apply)
+ ebs_optimized = (known after apply)
+ get_password_data = false
+ host_id = (known after apply)
+ host_resource_group_arn = (known after apply)
+ iam_instance_profile = (known after apply)
+ id = (known after apply)
+ instance_initiated_shutdown_behavior = (known after apply)
+ instance_lifecycle = (known after apply)
+ instance_state = (known after apply)
+ instance_type = "t3a.small"
+ ipv6_address_count = (known after apply)
+ ipv6_addresses = (known after apply)
+ key_name = "my-ssh-key"
+ monitoring = (known after apply)
+ outpost_arn = (known after apply)
+ password_data = (known after apply)
+ placement_group = (known after apply)
+ placement_partition_number = (known after apply)
+ primary_network_interface_id = (known after apply)
+ private_dns = (known after apply)
+ private_ip = (known after apply)
+ public_dns = (known after apply)
+ public_ip = (known after apply)
+ secondary_private_ips = (known after apply)
+ security_groups = (known after apply)
+ source_dest_check = true
+ spot_instance_request_id = (known after apply)
+ subnet_id = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "Bastion"
+ "Project" = "example"
+ "Prometheus-monitor" = "enabled"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "Bastion"
+ "Project" = "example"
+ "Prometheus-monitor" = "enabled"
}
+ tenancy = (known after apply)
+ user_data = "337ac5cb1a9b05f5b460f72f8cd79ae54a7f22f4"
+ user_data_base64 = (known after apply)
+ user_data_replace_on_change = false
+ vpc_security_group_ids = (known after apply)
}
# module.instances.aws_instance.nat_server_instance[0] will be created
+ resource "aws_instance" "nat_server_instance" {
+ ami = "ami-0f419d2f905bb344e"
+ arn = (known after apply)
+ associate_public_ip_address = (known after apply)
+ availability_zone = (known after apply)
+ cpu_core_count = (known after apply)
+ cpu_threads_per_core = (known after apply)
+ disable_api_stop = (known after apply)
+ disable_api_termination = (known after apply)
+ ebs_optimized = (known after apply)
+ get_password_data = false
+ host_id = (known after apply)
+ host_resource_group_arn = (known after apply)
+ iam_instance_profile = (known after apply)
+ id = (known after apply)
+ instance_initiated_shutdown_behavior = (known after apply)
+ instance_lifecycle = (known after apply)
+ instance_state = (known after apply)
+ instance_type = "t3a.small"
+ ipv6_address_count = (known after apply)
+ ipv6_addresses = (known after apply)
+ key_name = "my-ssh-key"
+ monitoring = (known after apply)
+ outpost_arn = (known after apply)
+ password_data = (known after apply)
+ placement_group = (known after apply)
+ placement_partition_number = (known after apply)
+ primary_network_interface_id = (known after apply)
+ private_dns = (known after apply)
+ private_ip = (known after apply)
+ public_dns = (known after apply)
+ public_ip = (known after apply)
+ secondary_private_ips = (known after apply)
+ security_groups = (known after apply)
+ source_dest_check = false
+ spot_instance_request_id = (known after apply)
+ subnet_id = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "NAT Server"
+ "Project" = "example"
+ "Prometheus-monitor" = "enabled"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "NAT Server"
+ "Project" = "example"
+ "Prometheus-monitor" = "enabled"
}
+ tenancy = (known after apply)
+ user_data = (known after apply)
+ user_data_base64 = "IyEvYmluL2Jhc2gKc3lzY3RsIC13IG5ldC5pcHY0LmlwX2ZvcndhcmQ9MQovc2Jpbi9pcHRhYmxlcyAtdCBuYXQgLUEgUE9TVFJPVVRJTkcgLW8gZXRoMCAtaiBNQVNRVUVSQURFCnl1bSBpbnN0YWxsIC15IGlwdGFibGVzLXNlcnZpY2VzCnNlcnZpY2UgaXB0YWJsZXMgc2F2ZQo="
+ user_data_replace_on_change = false
+ vpc_security_group_ids = (known after apply)
}
# module.nacl.aws_network_acl.my_application_acl will be created
+ resource "aws_network_acl" "my_application_acl" {
+ arn = (known after apply)
+ egress = [
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 0
+ icmp_code = -1
+ icmp_type = -1
+ ipv6_cidr_block = ""
+ protocol = "1"
+ rule_no = 140
+ to_port = 0
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 0
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "-1"
+ rule_no = 1
+ to_port = 0
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 1024
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 130
+ to_port = 65535
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 22
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 802
+ to_port = 22
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 443
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 110
+ to_port = 443
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 80
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 100
+ to_port = 80
},
+ {
+ action = "allow"
+ cidr_block = "10.2.0.0/16"
+ from_port = 23
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 900
+ to_port = 65535
},
]
+ id = (known after apply)
+ ingress = [
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 0
+ icmp_code = -1
+ icmp_type = -1
+ ipv6_cidr_block = ""
+ protocol = "1"
+ rule_no = 140
+ to_port = 0
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 0
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "-1"
+ rule_no = 1002
+ to_port = 0
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 0
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "17"
+ rule_no = 1
+ to_port = 65535
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 1024
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 130
+ to_port = 65535
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 22
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 1000
+ to_port = 22
},
+ {
+ action = "allow"
+ cidr_block = "10.2.0.0/16"
+ from_port = 22
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 120
+ to_port = 22
},
+ {
+ action = "allow"
+ cidr_block = "10.2.0.0/16"
+ from_port = 23
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 900
+ to_port = 65535
},
+ {
+ action = "allow"
+ cidr_block = "10.2.0.0/16"
+ from_port = 80
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 999
+ to_port = 80
},
]
+ owner_id = (known after apply)
+ subnet_ids = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "example-application"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "example-application"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.nacl.aws_network_acl.my_nat_acl will be created
+ resource "aws_network_acl" "my_nat_acl" {
+ arn = (known after apply)
+ egress = [
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 0
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "-1"
+ rule_no = 100
+ to_port = 0
},
]
+ id = (known after apply)
+ ingress = [
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 0
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "-1"
+ rule_no = 100
+ to_port = 0
},
]
+ owner_id = (known after apply)
+ subnet_ids = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "example-nat"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "example-nat"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.nacl.aws_network_acl.my_persistence_acl will be created
+ resource "aws_network_acl" "my_persistence_acl" {
+ arn = (known after apply)
+ egress = [
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 32768
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 130
+ to_port = 65535
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 443
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 110
+ to_port = 443
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 80
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 100
+ to_port = 80
},
+ {
+ action = "allow"
+ cidr_block = "10.2.0.0/16"
+ from_port = 0
+ icmp_code = -1
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "1"
+ rule_no = 141
+ to_port = 0
},
+ {
+ action = "allow"
+ cidr_block = "10.2.0.0/16"
+ from_port = 0
+ icmp_code = -1
+ icmp_type = 8
+ ipv6_cidr_block = ""
+ protocol = "1"
+ rule_no = 140
+ to_port = 0
},
+ {
+ action = "allow"
+ cidr_block = "10.2.0.0/16"
+ from_port = 23
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 900
+ to_port = 65535
},
]
+ id = (known after apply)
+ ingress = [
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 0
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 1
+ to_port = 65535
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 32768
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 130
+ to_port = 65535
},
+ {
+ action = "allow"
+ cidr_block = "10.2.0.0/16"
+ from_port = 0
+ icmp_code = -1
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "1"
+ rule_no = 141
+ to_port = 0
},
+ {
+ action = "allow"
+ cidr_block = "10.2.0.0/16"
+ from_port = 0
+ icmp_code = -1
+ icmp_type = 8
+ ipv6_cidr_block = ""
+ protocol = "1"
+ rule_no = 140
+ to_port = 0
},
+ {
+ action = "allow"
+ cidr_block = "10.2.0.0/16"
+ from_port = 22
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 120
+ to_port = 22
},
+ {
+ action = "allow"
+ cidr_block = "10.2.0.0/16"
+ from_port = 23
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 900
+ to_port = 65535
},
]
+ owner_id = (known after apply)
+ subnet_ids = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "example-persistence"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "example-persistence"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.nacl.aws_network_acl.my_public_acl will be created
+ resource "aws_network_acl" "my_public_acl" {
+ arn = (known after apply)
+ egress = [
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 0
+ icmp_code = -1
+ icmp_type = -1
+ ipv6_cidr_block = ""
+ protocol = "1"
+ rule_no = 3
+ to_port = 0
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 0
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 1
+ to_port = 65535
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 1024
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 119
+ to_port = 65535
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 22
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 120
+ to_port = 22
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 443
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 110
+ to_port = 443
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 80
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 100
+ to_port = 80
},
+ {
+ action = "allow"
+ cidr_block = "10.2.0.0/16"
+ from_port = 0
+ icmp_code = -1
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "1"
+ rule_no = 141
+ to_port = 0
},
+ {
+ action = "allow"
+ cidr_block = "10.2.0.0/16"
+ from_port = 0
+ icmp_code = -1
+ icmp_type = 8
+ ipv6_cidr_block = ""
+ protocol = "1"
+ rule_no = 140
+ to_port = 0
},
]
+ id = (known after apply)
+ ingress = [
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 0
+ icmp_code = -1
+ icmp_type = -1
+ ipv6_cidr_block = ""
+ protocol = "1"
+ rule_no = 10
+ to_port = 0
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 0
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "17"
+ rule_no = 1000
+ to_port = 65535
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 1024
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 999
+ to_port = 65535
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 22
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 50
+ to_port = 22
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 443
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 110
+ to_port = 443
},
+ {
+ action = "allow"
+ cidr_block = "0.0.0.0/0"
+ from_port = 80
+ icmp_code = 0
+ icmp_type = 0
+ ipv6_cidr_block = ""
+ protocol = "6"
+ rule_no = 100
+ to_port = 80
},
]
+ owner_id = (known after apply)
+ subnet_ids = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "example-public"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "example-public"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.subnet.aws_subnet.subnets["my-application-ap-northeast-1a"] will be created
+ resource "aws_subnet" "subnets" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = "ap-northeast-1a"
+ availability_zone_id = (known after apply)
+ cidr_block = "10.2.4.0/24"
+ enable_dns64 = false
+ enable_resource_name_dns_a_record_on_launch = false
+ enable_resource_name_dns_aaaa_record_on_launch = false
+ id = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ ipv6_native = false
+ map_customer_owned_ip_on_launch = false
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ private_dns_hostname_type_on_launch = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "my-application-ap-northeast-1a"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "my-application-ap-northeast-1a"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.subnet.aws_subnet.subnets["my-application-ap-northeast-1c"] will be created
+ resource "aws_subnet" "subnets" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = "ap-northeast-1c"
+ availability_zone_id = (known after apply)
+ cidr_block = "10.2.5.0/24"
+ enable_dns64 = false
+ enable_resource_name_dns_a_record_on_launch = false
+ enable_resource_name_dns_aaaa_record_on_launch = false
+ id = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ ipv6_native = false
+ map_customer_owned_ip_on_launch = false
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ private_dns_hostname_type_on_launch = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "my-application-ap-northeast-1c"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "my-application-ap-northeast-1c"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.subnet.aws_subnet.subnets["my-application-ap-northeast-1d"] will be created
+ resource "aws_subnet" "subnets" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = "ap-northeast-1d"
+ availability_zone_id = (known after apply)
+ cidr_block = "10.2.6.0/24"
+ enable_dns64 = false
+ enable_resource_name_dns_a_record_on_launch = false
+ enable_resource_name_dns_aaaa_record_on_launch = false
+ id = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ ipv6_native = false
+ map_customer_owned_ip_on_launch = false
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ private_dns_hostname_type_on_launch = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "my-application-ap-northeast-1d"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "my-application-ap-northeast-1d"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.subnet.aws_subnet.subnets["my-intra-ap-northeast-1a"] will be created
+ resource "aws_subnet" "subnets" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = "ap-northeast-1a"
+ availability_zone_id = (known after apply)
+ cidr_block = "10.2.8.0/24"
+ enable_dns64 = false
+ enable_resource_name_dns_a_record_on_launch = false
+ enable_resource_name_dns_aaaa_record_on_launch = false
+ id = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ ipv6_native = false
+ map_customer_owned_ip_on_launch = false
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ private_dns_hostname_type_on_launch = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "my-intra-ap-northeast-1a"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "my-intra-ap-northeast-1a"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.subnet.aws_subnet.subnets["my-intra-ap-northeast-1c"] will be created
+ resource "aws_subnet" "subnets" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = "ap-northeast-1c"
+ availability_zone_id = (known after apply)
+ cidr_block = "10.2.9.0/24"
+ enable_dns64 = false
+ enable_resource_name_dns_a_record_on_launch = false
+ enable_resource_name_dns_aaaa_record_on_launch = false
+ id = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ ipv6_native = false
+ map_customer_owned_ip_on_launch = false
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ private_dns_hostname_type_on_launch = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "my-intra-ap-northeast-1c"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "my-intra-ap-northeast-1c"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.subnet.aws_subnet.subnets["my-intra-ap-northeast-1d"] will be created
+ resource "aws_subnet" "subnets" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = "ap-northeast-1d"
+ availability_zone_id = (known after apply)
+ cidr_block = "10.2.10.0/24"
+ enable_dns64 = false
+ enable_resource_name_dns_a_record_on_launch = false
+ enable_resource_name_dns_aaaa_record_on_launch = false
+ id = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ ipv6_native = false
+ map_customer_owned_ip_on_launch = false
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ private_dns_hostname_type_on_launch = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "my-intra-ap-northeast-1d"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "my-intra-ap-northeast-1d"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.subnet.aws_subnet.subnets["my-nat-server"] will be created
+ resource "aws_subnet" "subnets" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = "ap-northeast-1d"
+ availability_zone_id = (known after apply)
+ cidr_block = "10.2.3.0/24"
+ enable_dns64 = false
+ enable_resource_name_dns_a_record_on_launch = false
+ enable_resource_name_dns_aaaa_record_on_launch = false
+ id = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ ipv6_native = false
+ map_customer_owned_ip_on_launch = false
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ private_dns_hostname_type_on_launch = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "my-nat-server"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "my-nat-server"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.subnet.aws_subnet.subnets["my-persistence-ap-northeast-1a"] will be created
+ resource "aws_subnet" "subnets" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = "ap-northeast-1a"
+ availability_zone_id = (known after apply)
+ cidr_block = "10.2.16.0/24"
+ enable_dns64 = false
+ enable_resource_name_dns_a_record_on_launch = false
+ enable_resource_name_dns_aaaa_record_on_launch = false
+ id = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ ipv6_native = false
+ map_customer_owned_ip_on_launch = false
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ private_dns_hostname_type_on_launch = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "my-persistence-ap-northeast-1a"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "my-persistence-ap-northeast-1a"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.subnet.aws_subnet.subnets["my-persistence-ap-northeast-1c"] will be created
+ resource "aws_subnet" "subnets" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = "ap-northeast-1c"
+ availability_zone_id = (known after apply)
+ cidr_block = "10.2.17.0/24"
+ enable_dns64 = false
+ enable_resource_name_dns_a_record_on_launch = false
+ enable_resource_name_dns_aaaa_record_on_launch = false
+ id = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ ipv6_native = false
+ map_customer_owned_ip_on_launch = false
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ private_dns_hostname_type_on_launch = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "my-persistence-ap-northeast-1c"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "my-persistence-ap-northeast-1c"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.subnet.aws_subnet.subnets["my-persistence-ap-northeast-1d"] will be created
+ resource "aws_subnet" "subnets" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = "ap-northeast-1d"
+ availability_zone_id = (known after apply)
+ cidr_block = "10.2.18.0/24"
+ enable_dns64 = false
+ enable_resource_name_dns_a_record_on_launch = false
+ enable_resource_name_dns_aaaa_record_on_launch = false
+ id = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ ipv6_native = false
+ map_customer_owned_ip_on_launch = false
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ private_dns_hostname_type_on_launch = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "my-persistence-ap-northeast-1d"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "my-persistence-ap-northeast-1d"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.subnet.aws_subnet.subnets["my-public-ap-northeast-1a"] will be created
+ resource "aws_subnet" "subnets" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = "ap-northeast-1a"
+ availability_zone_id = (known after apply)
+ cidr_block = "10.2.0.0/24"
+ enable_dns64 = false
+ enable_resource_name_dns_a_record_on_launch = false
+ enable_resource_name_dns_aaaa_record_on_launch = false
+ id = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ ipv6_native = false
+ map_customer_owned_ip_on_launch = false
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ private_dns_hostname_type_on_launch = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "my-public-ap-northeast-1a"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "my-public-ap-northeast-1a"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.subnet.aws_subnet.subnets["my-public-ap-northeast-1c"] will be created
+ resource "aws_subnet" "subnets" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = "ap-northeast-1c"
+ availability_zone_id = (known after apply)
+ cidr_block = "10.2.1.0/24"
+ enable_dns64 = false
+ enable_resource_name_dns_a_record_on_launch = false
+ enable_resource_name_dns_aaaa_record_on_launch = false
+ id = (known after apply)
+ ipv6_cidr_block_association_id = (known after apply)
+ ipv6_native = false
+ map_customer_owned_ip_on_launch = false
+ map_public_ip_on_launch = false
+ owner_id = (known after apply)
+ private_dns_hostname_type_on_launch = (known after apply)
+ tags = {
+ "Department" = "SRE"
+ "Name" = "my-public-ap-northeast-1c"
+ "Project" = "example"
}
+ tags_all = {
+ "Department" = "SRE"
+ "Name" = "my-public-ap-northeast-1c"
+ "Project" = "example"
}
+ vpc_id = (known after apply)
}
# module.subnet.aws_subnet.subnets["my-public-ap-northeast-1d"] will be created
+ resource "aws_subnet" "subnets" {
+ arn = (known after apply)
+ assign_ipv6_address_on_creation = false
+ availability_zone = "ap-northeast-1d"
+ availability_zone_id = (known after apply)
+ cidr_block = "10.2.2.0/24"
+ enable_dns64 = false
+ enable_resource_name_dns_a_record_on_launch = false
+ enable_resource_name_dns_